}
}
- /* Need to hand off device-model MMIO and writes to read-only
- * memory to the device model */
- if ( p2mt == p2m_mmio_dm
- || (p2mt == p2m_ram_ro && ft == ft_demand_write) )
+ /* Need to hand off device-model MMIO to the device model */
+ if ( p2mt == p2m_mmio_dm )
{
gpa = guest_walk_to_gpa(&gw);
goto mmio;
}
+ /* Log attempts to write to read-only memory */
+ if ( (p2mt == p2m_ram_ro) && (ft == ft_demand_write) )
+ {
+ static unsigned long lastpage = 0;
+ if ( xchg(&lastpage, va & PAGE_MASK) != (va & PAGE_MASK) )
+ gdprintk(XENLOG_DEBUG, "guest attempted write to read-only memory"
+ " page. va page=%#lx, mfn=%#lx\n",
+ va & PAGE_MASK, mfn_x(gmfn));
+ goto emulate; /* skip over the instruction */
+ }
+
/* In HVM guests, we force CR0.WP always to be set, so that the
* pagetables are always write-protected. If the guest thinks
* CR0.WP is clear, we must emulate faulting supervisor writes to
/* Translate a VA to an MFN, injecting a page-fault if we fail */
#define BAD_GVA_TO_GFN (~0UL)
#define BAD_GFN_TO_MFN (~1UL)
+#define READONLY_GFN (~2UL)
static mfn_t emulate_gva_to_mfn(struct vcpu *v,
unsigned long vaddr,
struct sh_emulate_ctxt *sh_ctxt)
/* Translate the GFN to an MFN */
mfn = gfn_to_mfn(v->domain, _gfn(gfn), &p2mt);
- if ( p2m_is_ram(p2mt) )
- {
- ASSERT(mfn_valid(mfn));
- v->arch.paging.last_write_was_pt = !!sh_mfn_is_a_page_table(mfn);
- return mfn;
- }
-
- return _mfn(BAD_GFN_TO_MFN);
+ if ( p2mt == p2m_ram_ro )
+ return _mfn(READONLY_GFN);
+ if ( !p2m_is_ram(p2mt) )
+ return _mfn(BAD_GFN_TO_MFN);
+
+ ASSERT(mfn_valid(mfn));
+ v->arch.paging.last_write_was_pt = !!sh_mfn_is_a_page_table(mfn);
+ return mfn;
}
/* Check that the user is allowed to perform this write.
* Returns a mapped pointer to write to, or NULL for error. */
-#define MAPPING_UNHANDLEABLE ((void *)0)
-#define MAPPING_EXCEPTION ((void *)1)
-#define emulate_map_dest_failed(rc) ((unsigned long)(rc) <= 1)
+#define MAPPING_UNHANDLEABLE ((void *)(unsigned long)X86EMUL_UNHANDLEABLE)
+#define MAPPING_EXCEPTION ((void *)(unsigned long)X86EMUL_EXCEPTION)
+#define MAPPING_SILENT_FAIL ((void *)(unsigned long)X86EMUL_OKAY)
+#define emulate_map_dest_failed(rc) ((unsigned long)(rc) <= 3)
static void *emulate_map_dest(struct vcpu *v,
unsigned long vaddr,
u32 bytes,
sh_ctxt->mfn1 = emulate_gva_to_mfn(v, vaddr, sh_ctxt);
if ( !mfn_valid(sh_ctxt->mfn1) )
return ((mfn_x(sh_ctxt->mfn1) == BAD_GVA_TO_GFN) ?
- MAPPING_EXCEPTION : MAPPING_UNHANDLEABLE);
+ MAPPING_EXCEPTION :
+ (mfn_x(sh_ctxt->mfn1) == READONLY_GFN) ?
+ MAPPING_SILENT_FAIL : MAPPING_UNHANDLEABLE);
/* Unaligned writes mean probably this isn't a pagetable */
if ( vaddr & (bytes - 1) )
sh_ctxt);
if ( !mfn_valid(sh_ctxt->mfn2) )
return ((mfn_x(sh_ctxt->mfn2) == BAD_GVA_TO_GFN) ?
- MAPPING_EXCEPTION : MAPPING_UNHANDLEABLE);
+ MAPPING_EXCEPTION :
+ (mfn_x(sh_ctxt->mfn2) == READONLY_GFN) ?
+ MAPPING_SILENT_FAIL : MAPPING_UNHANDLEABLE);
/* Cross-page writes mean probably not a pagetable */
sh_remove_shadows(v, sh_ctxt->mfn2, 0, 0 /* Slow, can fail */ );
addr = emulate_map_dest(v, vaddr, bytes, sh_ctxt);
if ( emulate_map_dest_failed(addr) )
- return ((addr == MAPPING_EXCEPTION) ?
- X86EMUL_EXCEPTION : X86EMUL_UNHANDLEABLE);
+ return (long)addr;
shadow_lock(v->domain);
memcpy(addr, src, bytes);
addr = emulate_map_dest(v, vaddr, bytes, sh_ctxt);
if ( emulate_map_dest_failed(addr) )
- return ((addr == MAPPING_EXCEPTION) ?
- X86EMUL_EXCEPTION : X86EMUL_UNHANDLEABLE);
+ return (long)addr;
shadow_lock(v->domain);
switch ( bytes )
addr = emulate_map_dest(v, vaddr, 8, sh_ctxt);
if ( emulate_map_dest_failed(addr) )
- return ((addr == MAPPING_EXCEPTION) ?
- X86EMUL_EXCEPTION : X86EMUL_UNHANDLEABLE);
+ return (long)addr;
old = (((u64) old_hi) << 32) | (u64) old_lo;
new = (((u64) new_hi) << 32) | (u64) new_lo;